}
/* --------------------- */
+/* XXX application state */
+static long nr_pages = 0;
+static unsigned long *page_array = NULL;
+
+static void *
+map_domain_va_32(
+ int xc_handle,
+ int cpu,
+ void *guest_va,
+ int perm)
+{
+ unsigned long pde, page;
+ unsigned long va = (unsigned long)guest_va;
+
+ static unsigned long cr3_phys[MAX_VIRT_CPUS];
+ static uint32_t *cr3_virt[MAX_VIRT_CPUS];
+ static unsigned long pde_phys[MAX_VIRT_CPUS];
+ static uint32_t *pde_virt[MAX_VIRT_CPUS];
+ static unsigned long page_phys[MAX_VIRT_CPUS];
+ static uint32_t *page_virt[MAX_VIRT_CPUS];
+ static int prev_perm[MAX_VIRT_CPUS];
+
+ if (ctxt[cpu].ctrlreg[3] == 0)
+ return NULL;
+ if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
+ {
+ cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
+ if ( cr3_virt[cpu] )
+ munmap(cr3_virt[cpu], PAGE_SIZE);
+ cr3_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ cr3_phys[cpu] >> PAGE_SHIFT);
+ if ( cr3_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
+ return NULL;
+ if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+ pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
+ if ( pde != pde_phys[cpu] )
+ {
+ pde_phys[cpu] = pde;
+ if ( pde_virt[cpu] )
+ munmap(pde_virt[cpu], PAGE_SIZE);
+ pde_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ pde_phys[cpu] >> PAGE_SHIFT);
+ if ( pde_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
+ return NULL;
+ if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+ page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
+ if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
+ {
+ page_phys[cpu] = page;
+ if ( page_virt[cpu] )
+ munmap(page_virt[cpu], PAGE_SIZE);
+ page_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, perm,
+ page_phys[cpu] >> PAGE_SHIFT);
+ if ( page_virt[cpu] == NULL )
+ {
+ page_phys[cpu] = 0;
+ return NULL;
+ }
+ prev_perm[cpu] = perm;
+ }
+
+ return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
+}
+
static void *
map_domain_va_pae(
uint64_t *l3, *l2, *l1;
static void *v;
- if (fetch_regs(xc_handle, cpu, NULL))
- return NULL;
-
l3 = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
if ( l3 == NULL )
return NULL;
l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
+ l2p = page_array[l2p];
l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
+ munmap(l3, PAGE_SIZE);
if ( l2 == NULL )
return NULL;
l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
+ l1p = page_array[l1p];
l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
+ munmap(l2, PAGE_SIZE);
if ( l1 == NULL )
return NULL;
p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
+ p = page_array[p];
if ( v != NULL )
munmap(v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+ munmap(l1, PAGE_SIZE);
if ( v == NULL )
return NULL;
#ifdef __x86_64__
static void *
-map_domain_va(
+map_domain_va_64(
int xc_handle,
int cpu,
void *guest_va,
int perm)
{
- unsigned long l3p, l2p, l1p, p, va = (unsigned long)guest_va;
+ unsigned long l3p, l2p, l1p, l1e, p, va = (unsigned long)guest_va;
uint64_t *l4, *l3, *l2, *l1;
static void *v;
if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
- return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
-
- if (fetch_regs(xc_handle, cpu, NULL))
- return NULL;
+ return map_domain_va_32(xc_handle, cpu, guest_va, perm);
- l4 = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+ l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
if ( l4 == NULL )
return NULL;
l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT;
+ l3p = page_array[l3p];
l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p);
+ munmap(l4, PAGE_SIZE);
if ( l3 == NULL )
return NULL;
l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT;
+ l2p = page_array[l2p];
l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
+ munmap(l3, PAGE_SIZE);
if ( l2 == NULL )
return NULL;
- l1p = l2[l2_table_offset(va)] >> PAGE_SHIFT;
- l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
- if ( l1 == NULL )
- return NULL;
+ l1 = NULL;
+ l1e = l2[l2_table_offset(va)];
+ l1p = l1e >> PAGE_SHIFT;
+ if (l1e & 0x80) { /* 2M pages */
+ p = (l1p + l1_table_offset(va));
+ } else { /* 4K pages */
+ l1p = page_array[l1p];
+ l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
+ munmap(l2, PAGE_SIZE);
+ if ( l1 == NULL )
+ return NULL;
- p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
+ p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
+ }
+ p = page_array[p];
if ( v != NULL )
munmap(v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+ if (l1)
+ munmap(l1, PAGE_SIZE);
if ( v == NULL )
return NULL;
}
#endif
-#ifdef __i386__
-/* XXX application state */
-static long nr_pages = 0;
-static unsigned long *page_array = NULL;
-
static void *
map_domain_va(
int xc_handle,
void *guest_va,
int perm)
{
-
- unsigned long pde, page;
- unsigned long va = (unsigned long)guest_va;
+ unsigned long va = (unsigned long) guest_va;
long npgs = xc_get_tot_pages(xc_handle, current_domid);
-
-
- static uint32_t cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
- static int prev_perm[MAX_VIRT_CPUS];
- static enum { MODE_UNKNOWN, MODE_32, MODE_PAE, MODE_64 } mode;
+ static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
if ( mode == MODE_UNKNOWN )
{
mode = MODE_32;
}
- if ( mode == MODE_PAE )
- return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
-
if ( nr_pages != npgs )
{
if ( nr_pages > 0 )
if (fetch_regs(xc_handle, cpu, NULL))
return NULL;
- if (paging_enabled(&ctxt[cpu])) {
- if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
- {
- cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
- if ( cr3_virt[cpu] )
- munmap(cr3_virt[cpu], PAGE_SIZE);
- cr3_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- cr3_phys[cpu] >> PAGE_SHIFT);
- if ( cr3_virt[cpu] == NULL )
- return NULL;
- }
- if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
- return NULL;
- if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
- pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
- if ( pde != pde_phys[cpu] )
- {
- pde_phys[cpu] = pde;
- if ( pde_virt[cpu] )
- munmap(pde_virt[cpu], PAGE_SIZE);
- pde_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- pde_phys[cpu] >> PAGE_SHIFT);
- if ( pde_virt[cpu] == NULL )
- return NULL;
- }
- if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
- return NULL;
- } else {
- page = va;
- }
- if (ctxt[cpu].flags & VGCF_HVM_GUEST)
- page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
- if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
- {
- page_phys[cpu] = page;
- if ( page_virt[cpu] )
- munmap(page_virt[cpu], PAGE_SIZE);
- page_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, perm,
- page_phys[cpu] >> PAGE_SHIFT);
- if ( page_virt[cpu] == NULL )
- {
- page_phys[cpu] = 0;
- return NULL;
- }
- prev_perm[cpu] = perm;
- }
+ if (!paging_enabled(&ctxt[cpu])) {
+ static void * v;
+ unsigned long page;
- return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
-}
+ if ( v != NULL )
+ munmap(v, PAGE_SIZE);
+
+ page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
+ v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ perm, page >> PAGE_SHIFT);
+
+ if ( v == NULL )
+ return NULL;
+
+ return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
+ }
+#ifdef __x86_64__
+ if ( mode == MODE_64 )
+ return map_domain_va_64(xc_handle, cpu, guest_va, perm);
#endif
+ if ( mode == MODE_PAE )
+ return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
+ /* else ( mode == MODE_32 ) */
+ return map_domain_va_32(xc_handle, cpu, guest_va, perm);
+}
static int
__xc_waitdomain(
break;
case PTRACE_SETREGS:
- if (!current_isfile)
+ if (current_isfile)
goto out_unspported; /* XXX not yet supported */
SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
case PTRACE_CONT:
case PTRACE_DETACH:
- if (!current_isfile)
+ if (current_isfile)
goto out_unspported; /* XXX not yet supported */
if ( request != PTRACE_SINGLESTEP )
{
migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
}
-struct vmx_store_cpu_guest_regs_callback_info {
+struct vmx_cpu_guest_regs_callback_info {
struct vcpu *v;
struct cpu_user_regs *regs;
unsigned long *crs;
static void vmx_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
+static void vmx_load_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs);
+
static void vmx_store_cpu_guest_regs_callback(void *data)
{
- struct vmx_store_cpu_guest_regs_callback_info *info = data;
+ struct vmx_cpu_guest_regs_callback_info *info = data;
vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
}
+static void vmx_load_cpu_guest_regs_callback(void *data)
+{
+ struct vmx_cpu_guest_regs_callback_info *info = data;
+ vmx_load_cpu_guest_regs(info->v, info->regs);
+}
+
static void vmx_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
{
/* Get register details from remote CPU. */
- struct vmx_store_cpu_guest_regs_callback_info info = {
+ struct vmx_cpu_guest_regs_callback_info info = {
.v = v, .regs = regs, .crs = crs };
cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
+ if ( v != current )
+ {
+ /* Non-current VCPUs must be paused to set the register snapshot. */
+ ASSERT(atomic_read(&v->pausecnt) != 0);
+
+ if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
+ {
+ struct vmx_cpu_guest_regs_callback_info info = {
+ .v = v, .regs = regs };
+ cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
+ on_selected_cpus(cpumask, vmx_load_cpu_guest_regs_callback,
+ &info, 1, 1);
+ return;
+ }
+
+ /* Register details are on this CPU. Load the correct VMCS. */
+ __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+ }
+
+ ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
+
#if defined (__x86_64__)
__vmwrite(GUEST_SS_SELECTOR, regs->ss);
+ __vmwrite(GUEST_DS_SELECTOR, regs->ds);
+ __vmwrite(GUEST_ES_SELECTOR, regs->es);
+ __vmwrite(GUEST_GS_SELECTOR, regs->gs);
+ __vmwrite(GUEST_FS_SELECTOR, regs->fs);
__vmwrite(GUEST_RSP, regs->rsp);
__vmwrite(GUEST_RFLAGS, regs->rflags);
__vmwrite(GUEST_RIP, regs->rip);
#elif defined (__i386__)
__vmwrite(GUEST_SS_SELECTOR, regs->ss);
+ __vmwrite(GUEST_DS_SELECTOR, regs->ds);
+ __vmwrite(GUEST_ES_SELECTOR, regs->es);
+ __vmwrite(GUEST_GS_SELECTOR, regs->gs);
+ __vmwrite(GUEST_FS_SELECTOR, regs->fs);
+
__vmwrite(GUEST_RSP, regs->esp);
__vmwrite(GUEST_RFLAGS, regs->eflags);
__vmwrite(GUEST_CS_SELECTOR, regs->cs);
__vmwrite(GUEST_RIP, regs->eip);
-#else
-#error Unsupported architecture
#endif
-}
-void vmx_modify_guest_state(struct vcpu *v)
-{
- modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs);
+ /* Reload current VCPU's VMCS if it was temporarily unloaded. */
+ if ( (v != current) && hvm_guest(current) )
+ __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
}
int vmx_realmode(struct vcpu *v)
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
- hvm_funcs.modify_guest_state = vmx_modify_guest_state;
-
hvm_funcs.realmode = vmx_realmode;
hvm_funcs.paging_enabled = vmx_paging_enabled;
hvm_funcs.instruction_length = vmx_instruction_length;